import numpy as np
import scipy
import scipy.stats
import torch as t
import matplotlib.pyplot as plt
from IPython.display import clear_output, display
from torch.nn import Sequential, Linear, ReLU, LeakyReLU, Dropout, Sigmoid
%matplotlib inline
device=t.device('cpu') #Overrride the above device choice
angle = np.random.uniform(-np.pi,np.pi,(1000,1)).astype('float32')
data = np.concatenate((np.cos(angle), np.sin(angle)),axis=1)
plt.scatter(data[:,0], data[:,1])
discriminator = Sequential(
Linear(2,50),
LeakyReLU(0.2),
Linear(50, 1),
Sigmoid()
) #dummy discriminator: please subsitute you own implementation
generator = Sequential(
Linear(2,2000),
LeakyReLU(0.1),
Linear(2000,1000),
LeakyReLU(0.1),
Linear(1000,500),
LeakyReLU(0.1),
Linear(500,200),
LeakyReLU(0.1),
Linear(200,100),
LeakyReLU(0.1),
Linear(100,100),
LeakyReLU(0.1),
Linear(100,50),
LeakyReLU(0.1),
Linear(50, 2),
LeakyReLU(0.1),
)# dummy generator: please subsitute you own implementation
discriminator = discriminator.to(device)
generator= generator.to(device)
d_optimizer = t.optim.Adam(discriminator.parameters(), lr=0.001)
g_optimizer = t.optim.Adam(generator.parameters(), lr=0.0005)
loss = t.nn.BCELoss()
def real_data_target(size):
'''
Tensor containing ones, with shape = size
'''
data = t.ones(size, 1)
return data
def fake_data_target(size):
'''
Tensor containing zeros, with shape = size
'''
data = t.zeros(size, 1)
return data
def train_discriminator(optimizer, real_data, fake_data):
# Reset gradients
optimizer.zero_grad()
# 1.1 Train on Real Data
prediction_real = discriminator(real_data)
# Calculate error and backpropagate
error_real = loss(prediction_real, real_data_target(real_data.size(0)))
error_real.backward()
# 1.2 Train on Fake Data
prediction_fake = discriminator(fake_data)
# Calculate error and backpropagate
error_fake = loss(prediction_fake, fake_data_target(real_data.size(0)))
error_fake.backward()
# 1.3 Update weights with gradients
optimizer.step()
# Return error
return error_real + error_fake
def train_generator(optimizer, fake_data):
# 2. Train Generator
# Reset gradients
optimizer.zero_grad()
# Sample noise and generate fake data
prediction = discriminator(fake_data)
# Calculate error and backpropagate
error = loss(prediction, real_data_target(prediction.size(0)))
error.backward()
# Update weights with gradients
optimizer.step()
# Return error
return error
def show(noise, fake, real):
fig, axs = plt.subplots(1,3, figsize = (12,4))
axs[0].set_title("noise")
axs[1].set_title("fake")
axs[2].set_title("real")
axs[0].scatter(noise.data.cpu().numpy()[:,0],noise.data.cpu().numpy()[:,1], color = 'gray')
axs[1].scatter(fake.data.cpu().numpy()[:,0], fake.data.cpu().numpy()[:,1], color = 'red')
axs[2].scatter(real.data.cpu().numpy()[:,0], real.data.cpu().numpy()[:,1], color = 'green')
fig.tight_layout()
plt.show()
num_epochs = 20000
sample_size = 500
for epoch in range(num_epochs):
# real data
angle = np.random.uniform(-np.pi,np.pi,(sample_size,1)).astype('float32')
data = np.concatenate((np.cos(angle), np.sin(angle)),axis=1)
real_data = t.from_numpy(data)
# train discriminator:
noise = t.empty(sample_size,2, device=device).uniform_(-1,1)
fake_data = generator(noise)#.detach()
d_error = train_discriminator(d_optimizer, real_data, fake_data)
# train generator - tryu with detach and without
noise = t.empty(sample_size,2, device=device).uniform_(-1,1)
fake_data = generator(noise)#.detach()
g_error = train_generator(g_optimizer, fake_data)
if epoch%100 ==0:
show(noise, fake_data, real_data)
print(f"Epoch: {epoch} G-error: {g_error.item()} D-error: {d_error.item()}")
batch_size = real_batch.shape[0]
# 1. Train Discriminator
real_data = real_batch
# Generate fake data
noise = torch.randn(batch_size, 100)
fake_data = generator(noise).detach()
# Train D
d_error = train_discriminator(d_optimizer, real_data, fake_data)
# 2. Train Generator
# Generate fake data
noise = torch.randn(batch_size, 100)
fake_data = generator(noise)
# Train G
g_error = train_generator(g_optimizer, fake_data)
# Log error
angle = np.random.uniform(-np.pi,np.pi,(sample_size,1)).astype('float32')
data = np.concatenate((np.cos(angle), np.sin(angle)),axis=1)
real_data = t.from_numpy(data)
#fake data:
noise = t.empty(sample_size,2, device=device).uniform_(-1,1)
fake_data = generator(noise).detach()
show(noise, fake_data, real_data)
Implement the GAN train loop that will train GAN to generate from the sample distribution.
Use another sampling distribution. One that is not concentrated on a line e.g. an ellipse.